static inline int may_switch_mode(struct domain *d)
{
- return 1; /* XXX */
+ return (d->tot_pages == 0);
}
int switch_native(struct domain *d)
l1_pgentry_t gdt_l1e;
unsigned int vcpuid;
- if ( !d )
+ if ( d == NULL )
return -EINVAL;
if ( !may_switch_mode(d) )
return -EACCES;
release_compat_l4(d->vcpu[vcpuid]);
}
+ d->arch.physaddr_bitsize = 64;
+
return 0;
}
l1_pgentry_t gdt_l1e;
unsigned int vcpuid;
- if ( !d )
+ if ( d == NULL )
return -EINVAL;
if ( compat_disabled )
return -ENOSYS;
return -ENOMEM;
}
+ d->arch.physaddr_bitsize =
+ fls((1UL << 32) - HYPERVISOR_COMPAT_VIRT_START(d)) - 1
+ + (PAGE_SIZE - 2);
+
return 0;
}
value = (parms.virt_hv_start_low + mask) & ~mask;
#ifdef CONFIG_COMPAT
- HYPERVISOR_COMPAT_VIRT_START(d) = max_t(unsigned int, m2p_compat_vstart, value);
+ HYPERVISOR_COMPAT_VIRT_START(d) =
+ max_t(unsigned int, m2p_compat_vstart, value);
+ d->arch.physaddr_bitsize = !IS_COMPAT(d) ? 64 :
+ fls((1UL << 32) - HYPERVISOR_COMPAT_VIRT_START(d)) - 1
+ + (PAGE_SIZE - 2);
if ( value > (!IS_COMPAT(d) ?
HYPERVISOR_VIRT_START :
__HYPERVISOR_COMPAT_VIRT_START) )
return 0;
}
+unsigned int domain_clamp_alloc_bitsize(struct domain *d, unsigned int bits)
+{
+ if ( d == NULL )
+ return bits;
+ return min(d->arch.physaddr_bitsize, bits);
+}
+
#include "compat/mm.c"
/*
struct page_info *pg = NULL;
cpumask_t mask;
unsigned long i;
- unsigned int bits = memflags >> _MEMF_bits, zone_hi;
+ unsigned int bits = memflags >> _MEMF_bits, zone_hi = NR_ZONES - 1;
ASSERT(!in_irq());
- if ( bits && bits <= PAGE_SHIFT + 1 )
- return NULL;
-
- zone_hi = bits - PAGE_SHIFT - 1;
- if ( zone_hi >= NR_ZONES )
- zone_hi = NR_ZONES - 1;
+ if ( bits )
+ {
+ bits = domain_clamp_alloc_bitsize(d, bits);
+ if ( bits <= (PAGE_SHIFT + 1) )
+ return NULL;
+ bits -= PAGE_SHIFT + 1;
+ if ( bits < zone_hi )
+ zone_hi = bits;
+ }
- if ( NR_ZONES + PAGE_SHIFT > dma_bitsize &&
- (!bits || bits > dma_bitsize) )
+ if ( (zone_hi + PAGE_SHIFT) >= dma_bitsize )
{
pg = alloc_heap_pages(dma_bitsize - PAGE_SHIFT, zone_hi, cpu, order);
int steal_page(
struct domain *d, struct page_info *page, unsigned int memflags);
+#define domain_clamp_alloc_bitsize(d, b) (b)
+
#endif /* __ASM_IA64_MM_H__ */
/* XXX these just exist until we can stop #including x86 code */
#define access_ok(addr,size) 1
#define array_access_ok(addr,count,size) 1
+
+#define domain_clamp_alloc_bitsize(d, b) (b)
+
#endif
/* Pseudophysical e820 map (XENMEM_memory_map). */
struct e820entry e820[3];
unsigned int nr_e820;
+
+ /* Maximum physical-address bitwidth supported by this guest. */
+ unsigned int physaddr_bitsize;
} __cacheline_aligned;
#ifdef CONFIG_X86_PAE
#ifdef CONFIG_COMPAT
int setup_arg_xlat_area(struct vcpu *, l4_pgentry_t *);
+unsigned int domain_clamp_alloc_bitsize(struct domain *d, unsigned int bits);
#else
# define setup_arg_xlat_area(vcpu, l4tab) 0
+# define domain_clamp_alloc_bitsize(d, b) (b)
#endif
+
#endif /* __ASM_X86_MM_H__ */